llms-py 3.0.0b4__py3-none-any.whl → 3.0.0b5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Binary file
llms/main.py CHANGED
@@ -11,6 +11,7 @@ import asyncio
11
11
  import base64
12
12
  import hashlib
13
13
  import importlib.util
14
+ import inspect
14
15
  import json
15
16
  import mimetypes
16
17
  import os
@@ -26,6 +27,7 @@ from datetime import datetime
26
27
  from importlib import resources # Py≥3.9 (pip install importlib_resources for 3.7/3.8)
27
28
  from io import BytesIO
28
29
  from pathlib import Path
30
+ from typing import get_type_hints
29
31
  from urllib.parse import parse_qs, urlencode
30
32
 
31
33
  import aiohttp
@@ -38,7 +40,7 @@ try:
38
40
  except ImportError:
39
41
  HAS_PIL = False
40
42
 
41
- VERSION = "3.0.0b4"
43
+ VERSION = "3.0.0b5"
42
44
  _ROOT = None
43
45
  DEBUG = True # os.getenv("PYPI_SERVICESTACK") is not None
44
46
  MOCK = False
@@ -322,6 +324,35 @@ def convert_image_if_needed(image_bytes, mimetype="image/png"):
322
324
  return image_bytes, mimetype
323
325
 
324
326
 
327
+ def function_to_tool_definition(func):
328
+ type_hints = get_type_hints(func)
329
+ signature = inspect.signature(func)
330
+ parameters = {"type": "object", "properties": {}, "required": []}
331
+
332
+ for name, param in signature.parameters.items():
333
+ param_type = type_hints.get(name, str)
334
+ param_type_name = "string"
335
+ if param_type == int:
336
+ param_type_name = "integer"
337
+ elif param_type == float:
338
+ param_type_name = "number"
339
+ elif param_type == bool:
340
+ param_type_name = "boolean"
341
+
342
+ parameters["properties"][name] = {"type": param_type_name}
343
+ if param.default == inspect.Parameter.empty:
344
+ parameters["required"].append(name)
345
+
346
+ return {
347
+ "type": "function",
348
+ "function": {
349
+ "name": func.__name__,
350
+ "description": func.__doc__ or "",
351
+ "parameters": parameters,
352
+ },
353
+ }
354
+
355
+
325
356
  async def process_chat(chat, provider_id=None):
326
357
  if not chat:
327
358
  raise Exception("No chat provided")
@@ -1034,8 +1065,73 @@ async def chat_completion(chat):
1034
1065
  provider = g_handlers[name]
1035
1066
  _log(f"provider: {name} {type(provider).__name__}")
1036
1067
  try:
1037
- response = await provider.chat(chat.copy())
1038
- return response
1068
+ # Inject global tools if present
1069
+ current_chat = chat.copy()
1070
+ # Inject global tools if present
1071
+ current_chat = chat.copy()
1072
+ if g_app.tool_definitions:
1073
+ include_all_tools = False
1074
+ only_tools = []
1075
+ if "metadata" in chat:
1076
+ only_tools = chat["metadata"].get("only_tools", "").split(",")
1077
+ include_all_tools = only_tools == "all"
1078
+
1079
+ if include_all_tools or len(only_tools) > 0:
1080
+ if "tools" not in current_chat:
1081
+ current_chat["tools"] = []
1082
+
1083
+ existing_tools = {t["function"]["name"] for t in current_chat["tools"]}
1084
+ for tool_def in g_app.tool_definitions:
1085
+ name = tool_def["function"]["name"]
1086
+ if name not in existing_tools and (include_all_tools or name in only_tools):
1087
+ current_chat["tools"].append(tool_def)
1088
+
1089
+ # Tool execution loop
1090
+ max_iterations = 5
1091
+ tool_history = []
1092
+ for _ in range(max_iterations):
1093
+ response = await provider.chat(current_chat)
1094
+
1095
+ # Check for tool_calls in the response
1096
+ choice = response.get("choices", [])[0] if response.get("choices") else {}
1097
+ message = choice.get("message", {})
1098
+ tool_calls = message.get("tool_calls")
1099
+
1100
+ if tool_calls:
1101
+ # Append the assistant's message with tool calls to history
1102
+ if "messages" not in current_chat:
1103
+ current_chat["messages"] = []
1104
+ current_chat["messages"].append(message)
1105
+ tool_history.append(message)
1106
+
1107
+ for tool_call in tool_calls:
1108
+ function_name = tool_call["function"]["name"]
1109
+ function_args = json.loads(tool_call["function"]["arguments"])
1110
+
1111
+ tool_result = f"Error: Tool {function_name} not found"
1112
+ if function_name in g_app.tools:
1113
+ try:
1114
+ func = g_app.tools[function_name]
1115
+ if inspect.iscoroutinefunction(func):
1116
+ tool_result = await func(**function_args)
1117
+ else:
1118
+ tool_result = func(**function_args)
1119
+ except Exception as e:
1120
+ tool_result = f"Error executing tool {function_name}: {e}"
1121
+
1122
+ # Append tool result to history
1123
+ tool_msg = {"role": "tool", "tool_call_id": tool_call["id"], "content": str(tool_result)}
1124
+ current_chat["messages"].append(tool_msg)
1125
+ tool_history.append(tool_msg)
1126
+
1127
+ # Continue loop to send tool results back to LLM
1128
+ continue
1129
+
1130
+ # If no tool calls, return the response
1131
+ if tool_history:
1132
+ response["tool_history"] = tool_history
1133
+ return response
1134
+
1039
1135
  except Exception as e:
1040
1136
  if first_exception is None:
1041
1137
  first_exception = e
@@ -1121,15 +1217,23 @@ async def cli_chat(chat, image=None, audio=None, file=None, args=None, raw=False
1121
1217
  printdump(chat)
1122
1218
 
1123
1219
  try:
1220
+ # Apply pre-chat filters
1221
+ context = {"chat": chat}
1222
+ for filter_func in g_app.chat_request_filters:
1223
+ chat = await filter_func(chat, context)
1224
+
1124
1225
  response = await chat_completion(chat)
1226
+
1227
+ # Apply post-chat filters
1228
+ for filter_func in g_app.chat_response_filters:
1229
+ response = await filter_func(response, context)
1125
1230
  if raw:
1126
1231
  print(json.dumps(response, indent=2))
1127
1232
  exit(0)
1128
1233
  else:
1129
1234
  msg = response["choices"][0]["message"]
1130
- if "answer" in msg:
1131
- answer = msg["content"]
1132
- print(answer)
1235
+ if "content" in msg or "answer" in msg:
1236
+ print(msg["content"])
1133
1237
 
1134
1238
  generated_files = []
1135
1239
  for choice in response["choices"]:
@@ -1851,6 +1955,9 @@ class AppExtensions:
1851
1955
  self.chat_response_filters = []
1852
1956
  self.server_add_get = []
1853
1957
  self.server_add_post = []
1958
+ self.server_add_post = []
1959
+ self.tools = {}
1960
+ self.tool_definitions = []
1854
1961
  self.all_providers = [
1855
1962
  OpenAiCompatible,
1856
1963
  MistralProvider,
@@ -1982,6 +2089,15 @@ class ExtensionContext:
1982
2089
  return session.get("userName")
1983
2090
  return None
1984
2091
 
2092
+ def register_tool(self, func, tool_def=None):
2093
+ if tool_def is None:
2094
+ tool_def = function_to_tool_definition(func)
2095
+
2096
+ name = tool_def["function"]["name"]
2097
+ self.log(f"Registered tool: {name}")
2098
+ self.app.tools[name] = func
2099
+ self.app.tool_definitions.append(tool_def)
2100
+
1985
2101
 
1986
2102
  def load_builtin_extensions():
1987
2103
  providers_path = _ROOT / "providers"
@@ -2011,7 +2127,7 @@ def load_builtin_extensions():
2011
2127
 
2012
2128
 
2013
2129
  def get_extensions_path():
2014
- return os.path.join(Path.home(), ".llms", "extensions")
2130
+ return os.environ.get("LLMS_EXTENSIONS_DIR", os.path.join(Path.home(), ".llms", "extensions"))
2015
2131
 
2016
2132
 
2017
2133
  def init_extensions(parser):
@@ -2569,9 +2685,7 @@ def main():
2569
2685
  chat = await request.json()
2570
2686
 
2571
2687
  # Apply pre-chat filters
2572
- context = {"request": request}
2573
- # Apply pre-chat filters
2574
- context = {"request": request}
2688
+ context = {"request": request, "chat": chat}
2575
2689
  for filter_func in g_app.chat_request_filters:
2576
2690
  chat = await filter_func(chat, context)
2577
2691
 
@@ -2588,16 +2702,6 @@ def main():
2588
2702
 
2589
2703
  app.router.add_post("/v1/chat/completions", chat_handler)
2590
2704
 
2591
- async def extensions_handler(request):
2592
- return web.json_response(g_app.ui_extensions)
2593
-
2594
- app.router.add_get("/ext", extensions_handler)
2595
-
2596
- async def models_handler(request):
2597
- return web.json_response(get_models())
2598
-
2599
- app.router.add_get("/models/list", models_handler)
2600
-
2601
2705
  async def active_models_handler(request):
2602
2706
  return web.json_response(get_active_models())
2603
2707
 
@@ -2731,6 +2835,16 @@ def main():
2731
2835
 
2732
2836
  app.router.add_post("/upload", upload_handler)
2733
2837
 
2838
+ async def extensions_handler(request):
2839
+ return web.json_response(g_app.ui_extensions)
2840
+
2841
+ app.router.add_get("/ext", extensions_handler)
2842
+
2843
+ async def tools_handler(request):
2844
+ return web.json_response(g_app.tool_definitions)
2845
+
2846
+ app.router.add_get("/ext/tools", tools_handler)
2847
+
2734
2848
  async def cache_handler(request):
2735
2849
  path = request.match_info["tail"]
2736
2850
  full_path = get_cache_path(path)
llms/providers-extra.json CHANGED
@@ -1,4 +1,36 @@
1
1
  {
2
+ "minimax": {
3
+ "models": {
4
+ "MiniMax-M2.1": {
5
+ "id": "MiniMax-M2.1",
6
+ "name": "MiniMax M2.1",
7
+ "family": "minimax",
8
+ "attachment": false,
9
+ "reasoning": true,
10
+ "tool_call": true,
11
+ "temperature": true,
12
+ "release_date": "2025-12-23",
13
+ "last_updated": "2025-12-23",
14
+ "modalities": {
15
+ "input": [
16
+ "text"
17
+ ],
18
+ "output": [
19
+ "text"
20
+ ]
21
+ },
22
+ "open_weights": true,
23
+ "cost": {
24
+ "input": 0.3,
25
+ "output": 1.2
26
+ },
27
+ "limit": {
28
+ "context": 204800,
29
+ "output": 128000
30
+ }
31
+ }
32
+ }
33
+ },
2
34
  "openrouter": {
3
35
  "models": {
4
36
  "google/gemini-2.5-flash-image": {